const runtime.bucketCnt
123 uses
runtime (current package)
map.go#L67: bucketCnt = abi.MapBucketCount
map.go#L73: loadFactorNum = (bucketCnt * 13 / 16) * loadFactorDen
map.go#L155: tophash [bucketCnt]uint8
map.go#L434: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L446: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L495: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L507: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L539: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L551: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L621: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L626: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L644: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L668: elem = add(insertk, bucketCnt*uintptr(t.KeySize))
map.go#L734: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L755: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L768: if i == bucketCnt-1 {
map.go#L787: i = bucketCnt - 1
map.go#L853: it.offset = uint8(r >> h.B & (bucketCnt - 1))
map.go#L914: for ; i < bucketCnt; i++ {
map.go#L915: offi := (i + it.offset) & (bucketCnt - 1)
map.go#L925: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
map.go#L1016: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L1103: return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
map.go#L1181: x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
map.go#L1189: y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
map.go#L1194: e := add(k, bucketCnt*uintptr(t.KeySize))
map.go#L1195: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
map.go#L1241: if dst.i == bucketCnt {
map.go#L1245: dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
map.go#L1247: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map.go#L1323: if t.Key.Align_ > bucketCnt {
map.go#L1326: if t.Elem.Align_ > bucketCnt {
map.go#L1335: if bucketCnt < 8 {
map.go#L1461: for i := 0; i < bucketCnt; i++ {
map.go#L1466: for ; pos < bucketCnt; pos++ {
map.go#L1472: if pos == bucketCnt {
map.go#L1478: srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
map.go#L1480: dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
map.go#L1570: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L1584: srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
map.go#L1610: offset := uint8(r >> h.B & (bucketCnt - 1))
map.go#L1639: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L1640: offi := (i + uintptr(offset)) & (bucketCnt - 1)
map.go#L1673: offset := uint8(r >> h.B & (bucketCnt - 1))
map.go#L1702: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L1703: offi := (i + uintptr(offset)) & (bucketCnt - 1)
map.go#L1712: ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
map_fast32.go#L44: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L46: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
map_fast32.go#L84: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L86: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
map_fast32.go#L126: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L166: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L175: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
map_fast32.go#L216: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L256: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L265: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
map_fast32.go#L298: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L310: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
map_fast32.go#L319: if i == bucketCnt-1 {
map_fast32.go#L338: i = bucketCnt - 1
map_fast32.go#L386: x.e = add(x.k, bucketCnt*4)
map_fast32.go#L394: y.e = add(y.k, bucketCnt*4)
map_fast32.go#L399: e := add(k, bucketCnt*4)
map_fast32.go#L400: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
map_fast32.go#L422: if dst.i == bucketCnt {
map_fast32.go#L426: dst.e = add(dst.k, bucketCnt*4)
map_fast32.go#L428: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_fast64.go#L44: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L46: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
map_fast64.go#L84: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L86: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true
map_fast64.go#L126: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L166: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L175: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
map_fast64.go#L216: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L256: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L265: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
map_fast64.go#L298: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L312: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
map_fast64.go#L321: if i == bucketCnt-1 {
map_fast64.go#L340: i = bucketCnt - 1
map_fast64.go#L388: x.e = add(x.k, bucketCnt*8)
map_fast64.go#L396: y.e = add(y.k, bucketCnt*8)
map_fast64.go#L401: e := add(k, bucketCnt*8)
map_fast64.go#L402: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
map_fast64.go#L424: if dst.i == bucketCnt {
map_fast64.go#L428: dst.e = add(dst.k, bucketCnt*8)
map_fast64.go#L430: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_faststr.go#L30: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L39: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
map_faststr.go#L45: keymaybe := uintptr(bucketCnt)
map_faststr.go#L46: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L55: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
map_faststr.go#L65: if keymaybe != bucketCnt {
map_faststr.go#L71: if keymaybe != bucketCnt {
map_faststr.go#L74: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
map_faststr.go#L95: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L101: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
map_faststr.go#L125: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L134: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
map_faststr.go#L140: keymaybe := uintptr(bucketCnt)
map_faststr.go#L141: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L150: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
map_faststr.go#L160: if keymaybe != bucketCnt {
map_faststr.go#L166: if keymaybe != bucketCnt {
map_faststr.go#L169: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
map_faststr.go#L190: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L196: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
map_faststr.go#L238: for i := uintptr(0); i < bucketCnt; i++ {
map_faststr.go#L285: insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
map_faststr.go#L293: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
map_faststr.go#L328: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L338: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
map_faststr.go#L347: if i == bucketCnt-1 {
map_faststr.go#L366: i = bucketCnt - 1
map_faststr.go#L414: x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L422: y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L427: e := add(k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L428: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
map_faststr.go#L450: if dst.i == bucketCnt {
map_faststr.go#L454: dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L456: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |